Code
# Setup device------------------------------------------------
from BI import bi, jnp
# Setup device------------------------------------------------
m = bi(platform='cpu', rand_seed = False)
# Simulate data ------------------------------------------------
N = 50
individual_predictor = m.dist.normal(0,1, shape = (N,1), sample = True)
kinship = m.dist.bernoulli(0.3, shape = (N,N), sample = True)
kinship = kinship.at[jnp.diag_indices(N)].set(0)
category = m.dist.categorical(jnp.array([.25,.25,.25,.25]), sample = True, shape = (N,))
N_grp, N_by_grp = jnp.unique(category, return_counts=True)
N_grp = N_grp.shape[0]
def sim_network(kinship, individual_predictor,category):
# Intercept
B_intercept = m.net.block_model(jnp.full((N,),0), 1, N, sample = True)
B_category = m.net.block_model(category, N_grp, N_by_grp, sample = True)
# SR
sr = m.net.sender_receiver(
individual_predictor,
individual_predictor,
s_mu = 0.4, r_mu = -0.4, sample = True)
# D
DR = m.net.dyadic_effect(kinship, d_sd=2.5, sample = True)
return m.dist.bernoulli(
logits = B_intercept + B_category + sr + DR,
sample = True
)
network = sim_network(m.net.mat_to_edgl(kinship), individual_predictor, category)
# Predictive model ------------------------------------------------
m.data_on_model = dict(
network = network,
dyadic_predictors = m.net.mat_to_edgl(kinship),
focal_individual_predictors = individual_predictor,
target_individual_predictors = individual_predictor,
category = category
)
def model(network, dyadic_predictors, focal_individual_predictors, target_individual_predictors,category):
N_id = focal_individual_predictors.shape[0]
# Block ---------------------------------------
B_intercept = m.net.block_model(jnp.full((N_id,),0), 1, N_id, name = "B_intercept")
B_category = m.net.block_model(category, N_grp, N_by_grp, name = "B_category")
## SR shape = N individuals---------------------------------------
sr = m.net.sender_receiver(
focal_individual_predictors,
target_individual_predictors,
s_mu = 0.4, r_mu = -0.4
)
# Dyadic shape = N dyads--------------------------------------
dr = m.net.dyadic_effect(dyadic_predictors, d_sd=2.5) # Diadic effect intercept only
m.dist.bernoulli(logits = B_intercept + B_category + sr + dr, obs=network)
m.fit(model, num_samples = 500, num_warmup = 500, num_chains = 1, thinning = 1)
m.summary()jax.local_device_count 32
0%| | 0/1000 [00:00<?, ?it/s]warmup: 0%| | 1/1000 [00:01<19:50, 1.19s/it, 1 steps of size 2.34e+00. acc. prob=0.00]warmup: 1%|β | 14/1000 [00:01<01:07, 14.62it/s, 31 steps of size 8.90e-03. acc. prob=0.64]warmup: 3%|β | 26/1000 [00:01<00:34, 28.47it/s, 63 steps of size 1.34e-01. acc. prob=0.74]warmup: 5%|β | 51/1000 [00:01<00:15, 63.21it/s, 127 steps of size 5.56e-02. acc. prob=0.76]warmup: 8%|β | 76/1000 [00:01<00:09, 97.26it/s, 31 steps of size 1.46e-01. acc. prob=0.77] warmup: 10%|β | 98/1000 [00:01<00:07, 122.72it/s, 63 steps of size 9.14e-02. acc. prob=0.77]warmup: 12%|ββ | 120/1000 [00:01<00:06, 144.83it/s, 31 steps of size 1.30e-01. acc. prob=0.77]warmup: 15%|ββ | 147/1000 [00:01<00:04, 175.04it/s, 63 steps of size 1.65e-01. acc. prob=0.78]warmup: 17%|ββ | 171/1000 [00:02<00:04, 191.70it/s, 31 steps of size 1.20e-01. acc. prob=0.77]warmup: 19%|ββ | 194/1000 [00:02<00:04, 198.03it/s, 63 steps of size 1.48e-01. acc. prob=0.78]warmup: 22%|βββ | 217/1000 [00:02<00:03, 204.59it/s, 127 steps of size 6.60e-02. acc. prob=0.78]warmup: 24%|βββ | 240/1000 [00:02<00:03, 210.69it/s, 31 steps of size 1.29e-01. acc. prob=0.78] warmup: 26%|βββ | 264/1000 [00:02<00:03, 216.52it/s, 127 steps of size 7.61e-02. acc. prob=0.78]warmup: 29%|βββ | 288/1000 [00:02<00:03, 222.74it/s, 18 steps of size 4.58e-02. acc. prob=0.78] warmup: 31%|βββ | 311/1000 [00:02<00:03, 213.64it/s, 127 steps of size 7.67e-02. acc. prob=0.78]warmup: 34%|ββββ | 339/1000 [00:02<00:02, 231.55it/s, 31 steps of size 2.06e-01. acc. prob=0.78] warmup: 37%|ββββ | 369/1000 [00:02<00:02, 250.55it/s, 63 steps of size 8.97e-02. acc. prob=0.78]warmup: 40%|ββββ | 402/1000 [00:02<00:02, 272.29it/s, 63 steps of size 8.39e-02. acc. prob=0.78]warmup: 43%|βββββ | 433/1000 [00:03<00:02, 282.02it/s, 31 steps of size 1.18e-01. acc. prob=0.79]warmup: 46%|βββββ | 462/1000 [00:03<00:02, 266.14it/s, 127 steps of size 6.96e-02. acc. prob=0.78]warmup: 49%|βββββ | 489/1000 [00:03<00:02, 240.85it/s, 31 steps of size 2.24e-01. acc. prob=0.79] sample: 51%|ββββββ | 514/1000 [00:03<00:02, 232.57it/s, 63 steps of size 8.49e-02. acc. prob=0.93]sample: 54%|ββββββ | 538/1000 [00:03<00:02, 227.02it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 56%|ββββββ | 561/1000 [00:03<00:01, 225.19it/s, 63 steps of size 8.49e-02. acc. prob=0.93]sample: 58%|ββββββ | 584/1000 [00:03<00:01, 222.42it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 61%|ββββββ | 607/1000 [00:03<00:01, 219.78it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 63%|βββββββ | 630/1000 [00:03<00:01, 216.51it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 65%|βββββββ | 653/1000 [00:04<00:01, 218.68it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 68%|βββββββ | 676/1000 [00:04<00:01, 219.76it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 70%|βββββββ | 699/1000 [00:04<00:01, 219.60it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 72%|ββββββββ | 722/1000 [00:04<00:01, 221.24it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 74%|ββββββββ | 745/1000 [00:04<00:01, 221.91it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 77%|ββββββββ | 768/1000 [00:04<00:01, 220.95it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 79%|ββββββββ | 791/1000 [00:04<00:00, 218.50it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 81%|βββββββββ | 814/1000 [00:04<00:00, 220.07it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 84%|βββββββββ | 837/1000 [00:04<00:00, 218.10it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 86%|βββββββββ | 860/1000 [00:05<00:00, 218.42it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 88%|βββββββββ | 882/1000 [00:05<00:00, 217.99it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 90%|βββββββββ | 904/1000 [00:05<00:00, 216.67it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 93%|ββββββββββ| 926/1000 [00:05<00:00, 216.37it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 95%|ββββββββββ| 948/1000 [00:05<00:00, 213.90it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 97%|ββββββββββ| 970/1000 [00:05<00:00, 215.33it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 99%|ββββββββββ| 992/1000 [00:05<00:00, 215.51it/s, 63 steps of size 8.49e-02. acc. prob=0.94]sample: 100%|ββββββββββ| 1000/1000 [00:05<00:00, 176.32it/s, 63 steps of size 8.49e-02. acc. prob=0.94]
arviz - WARNING - Shape validation failed: input_shape: (1, 500), minimum_shape: (chains=2, draws=4)
/home/sosa/work/3.12venv/lib/python3.12/site-packages/arviz/stats/diagnostics.py:991: RuntimeWarning:
invalid value encountered in scalar divide
/home/sosa/work/3.12venv/lib/python3.12/site-packages/arviz/stats/diagnostics.py:991: RuntimeWarning:
invalid value encountered in scalar divide
| mean | sd | hdi_5.5% | hdi_94.5% | mcse_mean | mcse_sd | ess_bulk | ess_tail | r_hat | |
|---|---|---|---|---|---|---|---|---|---|
| b_B_category[0, 0] | -5.00 | 2.07 | -8.57 | -2.06 | 0.07 | 0.09 | 923.76 | 426.62 | NaN |
| b_B_category[0, 1] | -6.59 | 2.28 | -10.12 | -2.80 | 0.06 | 0.10 | 1349.49 | 453.84 | NaN |
| b_B_category[0, 2] | -6.65 | 2.38 | -10.36 | -3.05 | 0.07 | 0.12 | 1179.34 | 284.27 | NaN |
| b_B_category[0, 3] | -6.22 | 2.37 | -9.77 | -2.31 | 0.07 | 0.13 | 1057.71 | 264.83 | NaN |
| b_B_category[1, 0] | -7.20 | 2.08 | -10.24 | -3.82 | 0.06 | 0.09 | 1349.49 | 395.60 | NaN |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| sr_rf[48, 1] | -0.05 | 1.33 | -1.80 | 2.40 | 0.04 | 0.11 | 1349.49 | 247.22 | NaN |
| sr_rf[49, 0] | 0.03 | 1.79 | -2.79 | 2.67 | 0.07 | 0.10 | 748.76 | 384.71 | NaN |
| sr_rf[49, 1] | -1.03 | 1.20 | -2.95 | 0.75 | 0.07 | 0.05 | 290.28 | 453.84 | NaN |
| sr_sigma[0] | 1.64 | 0.58 | 0.75 | 2.57 | 0.03 | 0.02 | 349.74 | 428.71 | NaN |
| sr_sigma[1] | 1.32 | 0.65 | 0.30 | 2.37 | 0.05 | 0.02 | 178.29 | 117.33 | NaN |
5131 rows Γ 9 columns